This notebook is another copy of post #11, however, in this post, I'm modifying my CNN to have dedicated "names" at each layer. The names will be important for my next post because it will allow me to access the filters within each convolutional layer and view them. Nothing really new in this post here either except when I define the convnet layers in TFlearn. You'll see that each of them have a name parameter attached to it as well.
In [2]:
import cv2
import numpy as np
import pandas as pd
import urllib
import math
import boto3
import os
import copy
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
In [3]:
# Temporarily load from np arrays
chi_photos_np = np.load('chi_photos_np_0.03_compress.npy')
lars_photos_np = np.load('lars_photos_np_0.03_compress.npy')
In [4]:
# View shape of numpy array
chi_photos_np.shape
Out[4]:
In [5]:
# Set width var
width = chi_photos_np.shape[-1]
width
Out[5]:
In [6]:
# Try out scaler on a manually set data (min of 0, max of 255)
from sklearn.preprocessing import MinMaxScaler
In [7]:
# Set test data list to train on (min of 0, max of 255)
test_list = np.array([0, 255]).reshape(-1, 1)
test_list
Out[7]:
In [8]:
# Initialize scaler
scaler = MinMaxScaler()
In [9]:
# Fit test list
scaler.fit(test_list)
Out[9]:
In [10]:
chi_photos_np.reshape(-1, width, width, 1).shape
Out[10]:
In [11]:
# Reshape to prepare for scaler
chi_photos_np_flat = chi_photos_np.reshape(1, -1)
chi_photos_np_flat[:10]
Out[11]:
In [12]:
# Scale
chi_photos_np_scaled = scaler.transform(chi_photos_np_flat)
chi_photos_np_scaled[:10]
Out[12]:
In [13]:
# Reshape to prepare for scaler
lars_photos_np_flat = lars_photos_np.reshape(1, -1)
lars_photos_np_scaled = scaler.transform(lars_photos_np_flat)
Now let's reshape.
In [14]:
# Reshape
chi_photos_reshaped = chi_photos_np_scaled.reshape(-1, width, width, 1)
lars_photos_reshaped = lars_photos_np_scaled.reshape(-1, width, width, 1)
print('{} has shape: {}'. format('chi_photos_reshaped', chi_photos_reshaped.shape))
print('{} has shape: {}'. format('lars_photos_reshaped', lars_photos_reshaped.shape))
In [15]:
# Create copy of chi's photos to start populating x_input
x_input = copy.deepcopy(chi_photos_reshaped)
print('{} has shape: {}'. format('x_input', x_input.shape))
In [16]:
# Concatentate lars' photos to existing x_input
x_input = np.append(x_input, lars_photos_reshaped, axis = 0)
print('{} has shape: {}'. format('x_input', x_input.shape))
In [17]:
# Create label arrays
y_chi = np.array([[1, 0] for i in chi_photos_reshaped])
y_lars = np.array([[0, 1] for i in lars_photos_reshaped])
print('{} has shape: {}'. format('y_chi', y_chi.shape))
print('{} has shape: {}'. format('y_lars', y_lars.shape))
In [18]:
# Preview the first few elements
y_chi[:5]
Out[18]:
In [19]:
y_lars[:5]
Out[19]:
In [20]:
# Create copy of chi's labels to start populating y_input
y_input = copy.deepcopy(y_chi)
print('{} has shape: {}'. format('y_input', y_input.shape))
In [21]:
# Concatentate lars' labels to existing y_input
y_input = np.append(y_input, y_lars, axis = 0)
print('{} has shape: {}'. format('y_input', y_input.shape))
In [22]:
# TFlearn libraries
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
In [23]:
# sentdex's code to build the neural net using tflearn
# Input layer --> conv layer w/ max pooling --> conv layer w/ max pooling --> fully connected layer --> output layer
convnet = input_data(shape = [None, width, width, 1], name = 'input')
convnet = conv_2d(convnet, 32, 10, activation = 'relu', name = 'conv_1')
convnet = max_pool_2d(convnet, 2, name = 'max_pool_1')
convnet = conv_2d(convnet, 64, 10, activation = 'relu', name = 'conv_2')
convnet = max_pool_2d(convnet, 2, name = 'max_pool_2')
convnet = fully_connected(convnet, 1024, activation = 'relu', name = 'fully_connected_1')
convnet = dropout(convnet, 0.8, name = 'dropout_1')
convnet = fully_connected(convnet, 2, activation = 'softmax', name = 'fully_connected_2')
convnet = regression(convnet, optimizer = 'sgd', learning_rate = 0.01, loss = 'categorical_crossentropy', name = 'targets')
In [24]:
# Import library
from sklearn.cross_validation import train_test_split
In [25]:
print(x_input.shape)
print(y_input.shape)
In [26]:
# Perform train test split
x_train, x_test, y_train, y_test = train_test_split(x_input, y_input, test_size = 0.1, stratify = y_input)
In [27]:
# Train with data
model = tflearn.DNN(convnet)
model.fit(
{'input': x_train},
{'targets': y_train},
n_epoch = 3,
validation_set = ({'input': x_test}, {'targets': y_test}),
snapshot_step = 500,
show_metric = True
)
In [28]:
# Save model
model.save('model_4_epochs_0.03_compression_99.6_named.tflearn')